About the project

Write a short description about the course and add a link to your GitHub repository here. This is an R Markdown (.Rmd) file so you can use R Markdown syntax. ‘I am feeling great’ ‘I heard of this course from University of Eastern Finland Pages’ ‘I expect to use R program and datascience tools for my research’

Here is the link to my Girhub repository: https://github.com/mohanbabu29/IODS-project

and Here is the link to the my [diary page]: (https://mohanbabu29.github.io/IODS-project/).



Reading data

Let’s read the data

library(dplyr)
learning2014 <-readxl::read_excel("~/IODS-project/data 2/learning2014.xlsx") %>%

  mutate_at(vars(gender), factor)
str(learning2014)
## Classes 'tbl_df', 'tbl' and 'data.frame':    166 obs. of  7 variables:
##  $ gender  : Factor w/ 2 levels "F","M": 1 2 1 2 2 1 2 1 2 1 ...
##  $ age     : num  53 55 49 53 49 38 50 37 37 42 ...
##  $ attitude: num  3.7 3.1 2.5 3.5 3.7 3.8 3.5 2.9 3.8 2.1 ...
##  $ deep    : num  3.58 2.92 3.5 3.5 3.67 ...
##  $ stra    : num  3.38 2.75 3.62 3.12 3.62 ...
##  $ surf    : num  2.58 3.17 2.25 2.25 2.83 ...
##  $ points  : num  25 12 24 10 22 21 21 31 24 26 ...

the data has 166 observations and 7 variables

Mostly likert scale (1-5) variables

Includes also Age (positive integers) and gender (as a two level factor: F and M)

Attitude = Global attitude toward statistics ~Da+Db+Dc+Dd+De+Df+Dg+Dh+Di+Dj

Points=

library(ggplot2)

pairs(learning2014[!names(learning2014) %in% c("gender")],col=learning2014$gender)

summary(learning2014)
##  gender       age           attitude          deep            stra      
##  F:110   Min.   :17.00   Min.   :1.400   Min.   :1.583   Min.   :1.250  
##  M: 56   1st Qu.:21.00   1st Qu.:2.600   1st Qu.:3.333   1st Qu.:2.625  
##          Median :22.00   Median :3.200   Median :3.667   Median :3.188  
##          Mean   :25.51   Mean   :3.143   Mean   :3.680   Mean   :3.121  
##          3rd Qu.:27.00   3rd Qu.:3.700   3rd Qu.:4.083   3rd Qu.:3.625  
##          Max.   :55.00   Max.   :5.000   Max.   :4.917   Max.   :5.000  
##       surf           points     
##  Min.   :1.583   Min.   : 7.00  
##  1st Qu.:2.417   1st Qu.:19.00  
##  Median :2.833   Median :23.00  
##  Mean   :2.787   Mean   :22.72  
##  3rd Qu.:3.167   3rd Qu.:27.75  
##  Max.   :4.333   Max.   :33.00
library(GGally)

library(ggplot2)



# create a more advanced plot matrix with ggpairs()

ggpairs(learning2014, 

        mapping = aes(col = gender, alpha = 0.3), 

        lower = list(combo = wrap("facethist", bins = 20))

        )

qplot(attitude, points, data = learning2014) + geom_smooth(method = "lm")

my_model <- lm(points ~ attitude, data = learning2014)

results <- summary(my_model)
knitr::kable(results$coefficients, digits=3, caption="Regression coefficients")
Regression coefficients
Estimate Std. Error t value Pr(>|t|)
(Intercept) 11.637 1.830 6.358 0
attitude 3.525 0.567 6.214 0
plot(my_model, which=c(1,2,5))

Regression and model validation

Describe the work you have done this week and summarize your learning.

This week we understood data wrangling, perform explanatory examination and fit a simple linear model to the data.

Reading data

Let’s read the data

library(dplyr)
learning2014 <-readxl::read_excel("~/IODS-project/data 2/learning2014.xlsx") %>%

  mutate_at(vars(gender), factor)
str(learning2014)
## Classes 'tbl_df', 'tbl' and 'data.frame':    166 obs. of  7 variables:
##  $ gender  : Factor w/ 2 levels "F","M": 1 2 1 2 2 1 2 1 2 1 ...
##  $ age     : num  53 55 49 53 49 38 50 37 37 42 ...
##  $ attitude: num  3.7 3.1 2.5 3.5 3.7 3.8 3.5 2.9 3.8 2.1 ...
##  $ deep    : num  3.58 2.92 3.5 3.5 3.67 ...
##  $ stra    : num  3.38 2.75 3.62 3.12 3.62 ...
##  $ surf    : num  2.58 3.17 2.25 2.25 2.83 ...
##  $ points  : num  25 12 24 10 22 21 21 31 24 26 ...

the data has 166 observations and 7 variables

Mostly likert scale (1-5) variables

Includes also Age (positive integers) and gender (as a two level factor: F and M)

Attitude = Global attitude toward statistics ~Da+Db+Dc+Dd+De+Df+Dg+Dh+Di+Dj

Points=Exam points (0-33)

dim(learning2014)
## [1] 166   7

library(ggplot2)

pairs(learning2014[!names(learning2014) %in% c("gender")],col=learning2014$gender)

summary(learning2014)
##  gender       age           attitude          deep            stra      
##  F:110   Min.   :17.00   Min.   :1.400   Min.   :1.583   Min.   :1.250  
##  M: 56   1st Qu.:21.00   1st Qu.:2.600   1st Qu.:3.333   1st Qu.:2.625  
##          Median :22.00   Median :3.200   Median :3.667   Median :3.188  
##          Mean   :25.51   Mean   :3.143   Mean   :3.680   Mean   :3.121  
##          3rd Qu.:27.00   3rd Qu.:3.700   3rd Qu.:4.083   3rd Qu.:3.625  
##          Max.   :55.00   Max.   :5.000   Max.   :4.917   Max.   :5.000  
##       surf           points     
##  Min.   :1.583   Min.   : 7.00  
##  1st Qu.:2.417   1st Qu.:19.00  
##  Median :2.833   Median :23.00  
##  Mean   :2.787   Mean   :22.72  
##  3rd Qu.:3.167   3rd Qu.:27.75  
##  Max.   :4.333   Max.   :33.00
library(GGally)

library(ggplot2)



# create a more advanced plot matrix with ggpairs()

ggpairs(learning2014, 

        mapping = aes(col = gender, alpha = 0.3), 

        lower = list(combo = wrap("facethist", bins = 20))

        )

qplot(attitude, points, data = learning2014) + geom_smooth(method = "lm")

my_model <- lm(points ~ attitude, data = learning2014)

results <- summary(my_model)
knitr::kable(results$coefficients, digits=3, caption="Regression coefficients")
Regression coefficients
Estimate Std. Error t value Pr(>|t|)
(Intercept) 11.637 1.830 6.358 0
attitude 3.525 0.567 6.214 0
plot(my_model, which=c(1,2,5))

>>>>>>> c98635c5e8506d39b39832ad4d622771b7ae5bed “C:/Program Files/Git/bin/git” config –mohanbabu29 ***


## Reading data

Let’s read the data

library(dplyr)
alc<- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/alc.txt", sep=",", header=TRUE)
print<-vars(alc)
str(alc)
## 'data.frame':    382 obs. of  35 variables:
##  $ school    : Factor w/ 2 levels "GP","MS": 1 1 1 1 1 1 1 1 1 1 ...
##  $ sex       : Factor w/ 2 levels "F","M": 1 1 1 1 1 2 2 1 2 2 ...
##  $ age       : int  18 17 15 15 16 16 16 17 15 15 ...
##  $ address   : Factor w/ 2 levels "R","U": 2 2 2 2 2 2 2 2 2 2 ...
##  $ famsize   : Factor w/ 2 levels "GT3","LE3": 1 1 2 1 1 2 2 1 2 1 ...
##  $ Pstatus   : Factor w/ 2 levels "A","T": 1 2 2 2 2 2 2 1 1 2 ...
##  $ Medu      : int  4 1 1 4 3 4 2 4 3 3 ...
##  $ Fedu      : int  4 1 1 2 3 3 2 4 2 4 ...
##  $ Mjob      : Factor w/ 5 levels "at_home","health",..: 1 1 1 2 3 4 3 3 4 3 ...
##  $ Fjob      : Factor w/ 5 levels "at_home","health",..: 5 3 3 4 3 3 3 5 3 3 ...
##  $ reason    : Factor w/ 4 levels "course","home",..: 1 1 3 2 2 4 2 2 2 2 ...
##  $ nursery   : Factor w/ 2 levels "no","yes": 2 1 2 2 2 2 2 2 2 2 ...
##  $ internet  : Factor w/ 2 levels "no","yes": 1 2 2 2 1 2 2 1 2 2 ...
##  $ guardian  : Factor w/ 3 levels "father","mother",..: 2 1 2 2 1 2 2 2 2 2 ...
##  $ traveltime: int  2 1 1 1 1 1 1 2 1 1 ...
##  $ studytime : int  2 2 2 3 2 2 2 2 2 2 ...
##  $ failures  : int  0 0 3 0 0 0 0 0 0 0 ...
##  $ schoolsup : Factor w/ 2 levels "no","yes": 2 1 2 1 1 1 1 2 1 1 ...
##  $ famsup    : Factor w/ 2 levels "no","yes": 1 2 1 2 2 2 1 2 2 2 ...
##  $ paid      : Factor w/ 2 levels "no","yes": 1 1 2 2 2 2 1 1 2 2 ...
##  $ activities: Factor w/ 2 levels "no","yes": 1 1 1 2 1 2 1 1 1 2 ...
##  $ higher    : Factor w/ 2 levels "no","yes": 2 2 2 2 2 2 2 2 2 2 ...
##  $ romantic  : Factor w/ 2 levels "no","yes": 1 1 1 2 1 1 1 1 1 1 ...
##  $ famrel    : int  4 5 4 3 4 5 4 4 4 5 ...
##  $ freetime  : int  3 3 3 2 3 4 4 1 2 5 ...
##  $ goout     : int  4 3 2 2 2 2 4 4 2 1 ...
##  $ Dalc      : int  1 1 2 1 1 1 1 1 1 1 ...
##  $ Walc      : int  1 1 3 1 2 2 1 1 1 1 ...
##  $ health    : int  3 3 3 5 5 5 3 1 1 5 ...
##  $ absences  : int  6 4 10 2 4 10 0 6 0 0 ...
##  $ G1        : int  5 5 7 15 6 15 12 6 16 14 ...
##  $ G2        : int  6 5 8 14 10 15 12 5 18 15 ...
##  $ G3        : int  6 6 10 15 10 15 11 6 19 15 ...
##  $ alc_use   : num  1 1 2.5 1 1.5 1.5 1 1 1 1 ...
##  $ high_use  : logi  FALSE FALSE TRUE FALSE FALSE FALSE ...

There were 382 observations and 35 variables. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por) were joned using variables “school”, “sex”, “age”, “address”, “famsize”, “Pstatus”, “Medu”, “Fedu”, “Mjob”, “Fjob”, “reason”, “nursery”,“internet” as (student) identifiers. The relation between variables in joined data and its relation with aclohol consumption was evaluated

# access the tidyverse libraries tidyr, dplyr, ggplot2

alc <- mutate(alc, high_use = alc_use > 2)
glimpse(alc)
## Observations: 382
## Variables: 35
## $ school     <fct> GP, GP, GP, GP, GP, GP, GP, GP, GP, GP, GP, GP, GP,...
## $ sex        <fct> F, F, F, F, F, M, M, F, M, M, F, F, M, M, M, F, F, ...
## $ age        <int> 18, 17, 15, 15, 16, 16, 16, 17, 15, 15, 15, 15, 15,...
## $ address    <fct> U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, ...
## $ famsize    <fct> GT3, GT3, LE3, GT3, GT3, LE3, LE3, GT3, LE3, GT3, G...
## $ Pstatus    <fct> A, T, T, T, T, T, T, A, A, T, T, T, T, T, A, T, T, ...
## $ Medu       <int> 4, 1, 1, 4, 3, 4, 2, 4, 3, 3, 4, 2, 4, 4, 2, 4, 4, ...
## $ Fedu       <int> 4, 1, 1, 2, 3, 3, 2, 4, 2, 4, 4, 1, 4, 3, 2, 4, 4, ...
## $ Mjob       <fct> at_home, at_home, at_home, health, other, services,...
## $ Fjob       <fct> teacher, other, other, services, other, other, othe...
## $ reason     <fct> course, course, other, home, home, reputation, home...
## $ nursery    <fct> yes, no, yes, yes, yes, yes, yes, yes, yes, yes, ye...
## $ internet   <fct> no, yes, yes, yes, no, yes, yes, no, yes, yes, yes,...
## $ guardian   <fct> mother, father, mother, mother, father, mother, mot...
## $ traveltime <int> 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3, 1, 2, 1, 1, 1, ...
## $ studytime  <int> 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 3, 1, 2, 3, 1, 3, ...
## $ failures   <int> 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
## $ schoolsup  <fct> yes, no, yes, no, no, no, no, yes, no, no, no, no, ...
## $ famsup     <fct> no, yes, no, yes, yes, yes, no, yes, yes, yes, yes,...
## $ paid       <fct> no, no, yes, yes, yes, yes, no, no, yes, yes, yes, ...
## $ activities <fct> no, no, no, yes, no, yes, no, no, no, yes, no, yes,...
## $ higher     <fct> yes, yes, yes, yes, yes, yes, yes, yes, yes, yes, y...
## $ romantic   <fct> no, no, no, yes, no, no, no, no, no, no, no, no, no...
## $ famrel     <int> 4, 5, 4, 3, 4, 5, 4, 4, 4, 5, 3, 5, 4, 5, 4, 4, 3, ...
## $ freetime   <int> 3, 3, 3, 2, 3, 4, 4, 1, 2, 5, 3, 2, 3, 4, 5, 4, 2, ...
## $ goout      <int> 4, 3, 2, 2, 2, 2, 4, 4, 2, 1, 3, 2, 3, 3, 2, 4, 3, ...
## $ Dalc       <int> 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
## $ Walc       <int> 1, 1, 3, 1, 2, 2, 1, 1, 1, 1, 2, 1, 3, 2, 1, 2, 2, ...
## $ health     <int> 3, 3, 3, 5, 5, 5, 3, 1, 1, 5, 2, 4, 5, 3, 3, 2, 2, ...
## $ absences   <int> 6, 4, 10, 2, 4, 10, 0, 6, 0, 0, 0, 4, 2, 2, 0, 4, 6...
## $ G1         <int> 5, 5, 7, 15, 6, 15, 12, 6, 16, 14, 10, 10, 14, 10, ...
## $ G2         <int> 6, 5, 8, 14, 10, 15, 12, 5, 18, 15, 8, 12, 14, 10, ...
## $ G3         <int> 6, 6, 10, 15, 10, 15, 11, 6, 19, 15, 9, 12, 14, 11,...
## $ alc_use    <dbl> 1.0, 1.0, 2.5, 1.0, 1.5, 1.5, 1.0, 1.0, 1.0, 1.0, 1...
## $ high_use   <lgl> FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FAL...
alc %>% group_by(alc_use,age) %>% summarise(count = n())
## # A tibble: 41 x 3
## # Groups:   alc_use [9]
##    alc_use   age count
##      <dbl> <int> <int>
##  1     1      15    46
##  2     1      16    41
##  3     1      17    26
##  4     1      18    27
##  5     1      19     3
##  6     1      20     1
##  7     1.5    15     9
##  8     1.5    16    24
##  9     1.5    17    20
## 10     1.5    18    12
## # ... with 31 more rows
alc %>% group_by(alc_use,sex) %>% summarise(count = n())
## # A tibble: 17 x 3
## # Groups:   alc_use [9]
##    alc_use sex   count
##      <dbl> <fct> <int>
##  1     1   F        89
##  2     1   M        55
##  3     1.5 F        41
##  4     1.5 M        27
##  5     2   F        27
##  6     2   M        31
##  7     2.5 F        25
##  8     2.5 M        17
##  9     3   F        11
## 10     3   M        21
## 11     3.5 F         3
## 12     3.5 M        14
## 13     4   F         1
## 14     4   M         8
## 15     4.5 M         3
## 16     5   F         1
## 17     5   M         8
alc %>% group_by(alc_use,Medu) %>% summarise(count = n())
## # A tibble: 37 x 3
## # Groups:   alc_use [9]
##    alc_use  Medu count
##      <dbl> <int> <int>
##  1     1       0     1
##  2     1       1    18
##  3     1       2    41
##  4     1       3    33
##  5     1       4    51
##  6     1.5     1    10
##  7     1.5     2    22
##  8     1.5     3    12
##  9     1.5     4    24
## 10     2       1     5
## # ... with 27 more rows
alc %>% group_by(alc_use,Fedu) %>% summarise(count = n())
## # A tibble: 35 x 3
## # Groups:   alc_use [9]
##    alc_use  Fedu count
##      <dbl> <int> <int>
##  1     1       0     2
##  2     1       1    28
##  3     1       2    42
##  4     1       3    38
##  5     1       4    34
##  6     1.5     1    16
##  7     1.5     2    15
##  8     1.5     3    19
##  9     1.5     4    18
## 10     2       1     9
## # ... with 25 more rows
alc %>% group_by(alc_use,age) %>% boxplot

high alcohol consumption seem to correlate with poor grades in both sexes

High alcohol consumption seems to be correlated with higher absences in both sexes

Student freetime seems not to influence alcohol consumption

Age of the students seems not to corrlate with high alcohol consumptipn

m <- glm(high_use ~ failures + absences + sex, data = alc, family = "binomial")
summary(m)
## 
## Call:
## glm(formula = high_use ~ failures + absences + sex, family = "binomial", 
##     data = alc)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -2.6629  -0.8545  -0.5894   1.0339   2.0428  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept) -1.95397    0.22819  -8.563  < 2e-16 ***
## failures     0.40462    0.15024   2.693  0.00708 ** 
## absences     0.07294    0.01796   4.061 4.88e-05 ***
## sexM         0.98848    0.24453   4.042 5.29e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 462.21  on 381  degrees of freedom
## Residual deviance: 418.64  on 378  degrees of freedom
## AIC: 426.64
## 
## Number of Fisher Scoring iterations: 4
coef(m)
## (Intercept)    failures    absences        sexM 
## -1.95396790  0.40461608  0.07293654  0.98847614
OR <- coef(m) %>% exp
CI <- confint(m) %>% exp
## Waiting for profiling to be done...
cbind(OR, CI)
##                    OR      2.5 %    97.5 %
## (Intercept) 0.1417107 0.08883883 0.2178283
## failures    1.4987270 1.11549818 2.0187171
## absences    1.0756623 1.04072883 1.1163576
## sexM        2.6871365 1.67434331 4.3755694

For every 1 point increase in absence, the risk of high alcohol consumption (OR) is 0.07294 and is highly singificant.

for every 1 point increase in failure, the risk of high alcohol consumption (OR) is 0.40462 and is highly singificant

compared to females, males have higher alcohol consumption

For every 1 point increase in failures, the risk of high aclhol consumption is 1.4987270

For every 1 point increase in absences, the risk of high aclhol consumption is 1.0756623

probabilities <- predict(m, type = "response")
alc <- mutate(alc, probability = probabilities)
alc <- mutate(alc, prediction = probability > 0.5)
select(alc, failures, absences, sex, high_use, probability, prediction) %>% tail(10)
##     failures absences sex high_use probability prediction
## 373        1        0   M    FALSE   0.3633449      FALSE
## 374        1       14   M     TRUE   0.6130701       TRUE
## 375        0        2   F    FALSE   0.1408685      FALSE
## 376        0        7   F    FALSE   0.1910175      FALSE
## 377        1        0   F    FALSE   0.1751799      FALSE
## 378        0        0   F    FALSE   0.1241213      FALSE
## 379        1        0   F    FALSE   0.1751799      FALSE
## 380        1        0   F    FALSE   0.1751799      FALSE
## 381        0        3   M     TRUE   0.3215447      FALSE
## 382        0        0   M     TRUE   0.2757800      FALSE
table(high_use = alc$high_use, prediction = alc$prediction)
##         prediction
## high_use FALSE TRUE
##    FALSE   258   12
##    TRUE     86   26
table(high_use = alc$high_use, prediction = alc$prediction) %>% prop.table %>% addmargins
##         prediction
## high_use      FALSE       TRUE        Sum
##    FALSE 0.67539267 0.03141361 0.70680628
##    TRUE  0.22513089 0.06806283 0.29319372
##    Sum   0.90052356 0.09947644 1.00000000
loss_func <- function(class, prob) {
  n_wrong <- abs(class - prob) > 0.5
  mean(n_wrong)
}
loss_func(class = alc$high_use, prob = alc$probability)
## [1] 0.2565445
loss_func <- function(class, prob) {
  n_wrong <- abs(class - prob) > 0.5
  mean(n_wrong)}
loss_func(class = alc$high_use, prob = alc$probability)
## [1] 0.2565445

Every 4th model predictions on high alcohol consumption are wrong

library(boot)
cv <- cv.glm(data = alc, cost = loss_func, glmfit = m, K = 10)
cv$delta[1]
## [1] 0.2565445


# chapter 4 exercise, Mohan Babu, 25.11.2019

title: “Chapter4” output: html_document —

1. Loading the Boston data from MASS package

library(MASS)
## 
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
## 
##     select
str(Boston)
## 'data.frame':    506 obs. of  14 variables:
##  $ crim   : num  0.00632 0.02731 0.02729 0.03237 0.06905 ...
##  $ zn     : num  18 0 0 0 0 0 12.5 12.5 12.5 12.5 ...
##  $ indus  : num  2.31 7.07 7.07 2.18 2.18 2.18 7.87 7.87 7.87 7.87 ...
##  $ chas   : int  0 0 0 0 0 0 0 0 0 0 ...
##  $ nox    : num  0.538 0.469 0.469 0.458 0.458 0.458 0.524 0.524 0.524 0.524 ...
##  $ rm     : num  6.58 6.42 7.18 7 7.15 ...
##  $ age    : num  65.2 78.9 61.1 45.8 54.2 58.7 66.6 96.1 100 85.9 ...
##  $ dis    : num  4.09 4.97 4.97 6.06 6.06 ...
##  $ rad    : int  1 2 2 3 3 3 5 5 5 5 ...
##  $ tax    : num  296 242 242 222 222 222 311 311 311 311 ...
##  $ ptratio: num  15.3 17.8 17.8 18.7 18.7 18.7 15.2 15.2 15.2 15.2 ...
##  $ black  : num  397 397 393 395 397 ...
##  $ lstat  : num  4.98 9.14 4.03 2.94 5.33 ...
##  $ medv   : num  24 21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9 ...
dim(Boston)
## [1] 506  14

==================================================== # The Boston data consists of housing values in suburbs of Boston with data frame consisting of 506 rows and 14 columns containing variables. ========================================================

3. Graphical overview of Boston data

library(ggplot2)

##       crim                zn             indus            chas        
##  Min.   : 0.00632   Min.   :  0.00   Min.   : 0.46   Min.   :0.00000  
##  1st Qu.: 0.08204   1st Qu.:  0.00   1st Qu.: 5.19   1st Qu.:0.00000  
##  Median : 0.25651   Median :  0.00   Median : 9.69   Median :0.00000  
##  Mean   : 3.61352   Mean   : 11.36   Mean   :11.14   Mean   :0.06917  
##  3rd Qu.: 3.67708   3rd Qu.: 12.50   3rd Qu.:18.10   3rd Qu.:0.00000  
##  Max.   :88.97620   Max.   :100.00   Max.   :27.74   Max.   :1.00000  
##       nox               rm             age              dis        
##  Min.   :0.3850   Min.   :3.561   Min.   :  2.90   Min.   : 1.130  
##  1st Qu.:0.4490   1st Qu.:5.886   1st Qu.: 45.02   1st Qu.: 2.100  
##  Median :0.5380   Median :6.208   Median : 77.50   Median : 3.207  
##  Mean   :0.5547   Mean   :6.285   Mean   : 68.57   Mean   : 3.795  
##  3rd Qu.:0.6240   3rd Qu.:6.623   3rd Qu.: 94.08   3rd Qu.: 5.188  
##  Max.   :0.8710   Max.   :8.780   Max.   :100.00   Max.   :12.127  
##       rad              tax           ptratio          black       
##  Min.   : 1.000   Min.   :187.0   Min.   :12.60   Min.   :  0.32  
##  1st Qu.: 4.000   1st Qu.:279.0   1st Qu.:17.40   1st Qu.:375.38  
##  Median : 5.000   Median :330.0   Median :19.05   Median :391.44  
##  Mean   : 9.549   Mean   :408.2   Mean   :18.46   Mean   :356.67  
##  3rd Qu.:24.000   3rd Qu.:666.0   3rd Qu.:20.20   3rd Qu.:396.23  
##  Max.   :24.000   Max.   :711.0   Max.   :22.00   Max.   :396.90  
##      lstat            medv      
##  Min.   : 1.73   Min.   : 5.00  
##  1st Qu.: 6.95   1st Qu.:17.02  
##  Median :11.36   Median :21.20  
##  Mean   :12.65   Mean   :22.53  
##  3rd Qu.:16.95   3rd Qu.:25.00  
##  Max.   :37.97   Max.   :50.00

3. correlation matrix of Boston data

cor_matrix<-cor(Boston)

4. visualize correlation matrix

corrplot::corrplot(cor_matrix, method="circle", type="upper", cl.pos="b", tl.pos="d", tl.cex = 0.6)

====================== Crime rates are strongly correlated with index of accessibility to radial highways ======================

4. Scaling the dataset and summary of the scaled data

boston_scaled<-scale(Boston)
summary(boston_scaled)
##       crim                 zn               indus        
##  Min.   :-0.419367   Min.   :-0.48724   Min.   :-1.5563  
##  1st Qu.:-0.410563   1st Qu.:-0.48724   1st Qu.:-0.8668  
##  Median :-0.390280   Median :-0.48724   Median :-0.2109  
##  Mean   : 0.000000   Mean   : 0.00000   Mean   : 0.0000  
##  3rd Qu.: 0.007389   3rd Qu.: 0.04872   3rd Qu.: 1.0150  
##  Max.   : 9.924110   Max.   : 3.80047   Max.   : 2.4202  
##       chas              nox                rm               age         
##  Min.   :-0.2723   Min.   :-1.4644   Min.   :-3.8764   Min.   :-2.3331  
##  1st Qu.:-0.2723   1st Qu.:-0.9121   1st Qu.:-0.5681   1st Qu.:-0.8366  
##  Median :-0.2723   Median :-0.1441   Median :-0.1084   Median : 0.3171  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.:-0.2723   3rd Qu.: 0.5981   3rd Qu.: 0.4823   3rd Qu.: 0.9059  
##  Max.   : 3.6648   Max.   : 2.7296   Max.   : 3.5515   Max.   : 1.1164  
##       dis               rad               tax             ptratio       
##  Min.   :-1.2658   Min.   :-0.9819   Min.   :-1.3127   Min.   :-2.7047  
##  1st Qu.:-0.8049   1st Qu.:-0.6373   1st Qu.:-0.7668   1st Qu.:-0.4876  
##  Median :-0.2790   Median :-0.5225   Median :-0.4642   Median : 0.2746  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.6617   3rd Qu.: 1.6596   3rd Qu.: 1.5294   3rd Qu.: 0.8058  
##  Max.   : 3.9566   Max.   : 1.6596   Max.   : 1.7964   Max.   : 1.6372  
##      black             lstat              medv        
##  Min.   :-3.9033   Min.   :-1.5296   Min.   :-1.9063  
##  1st Qu.: 0.2049   1st Qu.:-0.7986   1st Qu.:-0.5989  
##  Median : 0.3808   Median :-0.1811   Median :-0.1449  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.4332   3rd Qu.: 0.6024   3rd Qu.: 0.2683  
##  Max.   : 0.4406   Max.   : 3.5453   Max.   : 2.9865

4. creating quantile vector of crime rate

class(boston_scaled)
## [1] "matrix"
boston_scaled<-as.data.frame(boston_scaled)
summary(boston_scaled$crim)
##      Min.   1st Qu.    Median      Mean   3rd Qu.      Max. 
## -0.419367 -0.410563 -0.390280  0.000000  0.007389  9.924110
bins<-quantile(boston_scaled$crim)
print(bins)
##           0%          25%          50%          75%         100% 
## -0.419366929 -0.410563278 -0.390280295  0.007389247  9.924109610

4. creating categorical variable of crime rate

crime<-cut(boston_scaled$crim, breaks = bins, include.lowest = TRUE, label<-c("low","med_low","med_high","high"))
table(crime)
## crime
##      low  med_low med_high     high 
##      127      126      126      127

4. Dropping old crime data

boston_scaled <-dplyr::select(boston_scaled, -crim)

4. Adding the new categorical value to scaled data

boston_scaled <-data.frame(boston_scaled, crime)

4. Dividing the dataset to train and test sets

boston_scaled <-data.frame(boston_scaled, crime)
n<-nrow(boston_scaled)

4. Dividing the dataset to train and test sets to include 80% of the data to the train set

ind<-sample(n,size = n*0.8)

4.creating train set

train <- boston_scaled[ind,]

4. Creating test set

test <- boston_scaled[-ind,]

4. Saving the correct classes from the test data

correct_classes<-(test$crime)

4. remove the crime variable from the test data

test <- dplyr::select(test, -crime)

5. Fitting the linear discriminant analysis on the train set

lda.fit <- lda(crime~zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+black+lstat+medv, data = train)
lda.fit
## Call:
## lda(crime ~ zn + indus + chas + nox + rm + age + dis + rad + 
##     tax + ptratio + black + lstat + medv, data = train)
## 
## Prior probabilities of groups:
##       low   med_low  med_high      high 
## 0.2698020 0.2277228 0.2450495 0.2574257 
## 
## Group means:
##                  zn      indus        chas        nox          rm
## low       0.9290821 -0.9043974 -0.12784833 -0.8811777  0.45136424
## med_low  -0.0449532 -0.3522661 -0.01556166 -0.6080292 -0.14858057
## med_high -0.4075493  0.1798342  0.20489520  0.3998634  0.07224354
## high     -0.4872402  1.0170690 -0.08304540  1.0894043 -0.52326309
##                 age        dis        rad        tax    ptratio
## low      -0.8799431  0.8757234 -0.6836912 -0.7390553 -0.4545029
## med_low  -0.3811610  0.4085716 -0.5412094 -0.4611175 -0.1295800
## med_high  0.4411915 -0.3657406 -0.4435998 -0.3185150 -0.2500720
## high      0.8155168 -0.8564699  1.6386213  1.5144083  0.7813507
##                black       lstat        medv
## low       0.37709554 -0.76762917  0.52591755
## med_low   0.31030831 -0.11933711  0.01345156
## med_high  0.05261903  0.05490616  0.13360834
## high     -0.83537529  0.92160860 -0.77847650
## 
## Coefficients of linear discriminants:
##                 LD1         LD2        LD3
## zn       0.08953054  0.56113425 -1.0101364
## indus    0.07218634 -0.22174594  0.2397393
## chas    -0.09531663 -0.05996422  0.1169146
## nox      0.27621683 -0.79385816 -1.3189436
## rm      -0.13342457 -0.11478356 -0.2125507
## age      0.22966338 -0.41609631 -0.1325351
## dis     -0.02814771 -0.22710599  0.2165613
## rad      3.59273354  1.08337388 -0.2786836
## tax      0.18626877 -0.09533914  0.8325966
## ptratio  0.10340365 -0.04811054 -0.3811377
## black   -0.09074088  0.06197170  0.1196124
## lstat    0.21828270 -0.22692557  0.4064369
## medv     0.19223067 -0.42219229 -0.1267976
## 
## Proportion of trace:
##    LD1    LD2    LD3 
## 0.9580 0.0322 0.0098

5. Drawing the LDA (bi)plot

lda.arrows <- function(x, myscale = 1, arrow_heads = 0.1, color = "red", tex = 0.75, choices = c(1,2)){
  heads <- coef(x)
  arrows(x0 = 0, y0 = 0, 
         x1 = myscale * heads[,choices[1]], 
         y1 = myscale * heads[,choices[2]], col=color, length = arrow_heads)
  text(myscale * heads[,choices], labels = row.names(heads), 
       cex = tex, col=color, pos=3)
}
classes<-as.numeric(train$crime)
plot(lda.fit, col=classes, dimen = 2)
lda.arrows(lda.fit, myscale = 1)

6. Fitting the linear discriminant analysis on the test set

lda.pred <- predict(lda.fit, newdata = test)
table(correct = correct_classes, predicted = lda.pred$class)
##           predicted
## correct    low med_low med_high high
##   low       14       3        1    0
##   med_low    7      18        9    0
##   med_high   1       7       16    3
##   high       0       0        0   23

========================================= Classifier seem to predict the crime rates correctly =========================================

7,reloading the Boston data and scaling

summary(Boston)
##       crim                zn             indus            chas        
##  Min.   : 0.00632   Min.   :  0.00   Min.   : 0.46   Min.   :0.00000  
##  1st Qu.: 0.08204   1st Qu.:  0.00   1st Qu.: 5.19   1st Qu.:0.00000  
##  Median : 0.25651   Median :  0.00   Median : 9.69   Median :0.00000  
##  Mean   : 3.61352   Mean   : 11.36   Mean   :11.14   Mean   :0.06917  
##  3rd Qu.: 3.67708   3rd Qu.: 12.50   3rd Qu.:18.10   3rd Qu.:0.00000  
##  Max.   :88.97620   Max.   :100.00   Max.   :27.74   Max.   :1.00000  
##       nox               rm             age              dis        
##  Min.   :0.3850   Min.   :3.561   Min.   :  2.90   Min.   : 1.130  
##  1st Qu.:0.4490   1st Qu.:5.886   1st Qu.: 45.02   1st Qu.: 2.100  
##  Median :0.5380   Median :6.208   Median : 77.50   Median : 3.207  
##  Mean   :0.5547   Mean   :6.285   Mean   : 68.57   Mean   : 3.795  
##  3rd Qu.:0.6240   3rd Qu.:6.623   3rd Qu.: 94.08   3rd Qu.: 5.188  
##  Max.   :0.8710   Max.   :8.780   Max.   :100.00   Max.   :12.127  
##       rad              tax           ptratio          black       
##  Min.   : 1.000   Min.   :187.0   Min.   :12.60   Min.   :  0.32  
##  1st Qu.: 4.000   1st Qu.:279.0   1st Qu.:17.40   1st Qu.:375.38  
##  Median : 5.000   Median :330.0   Median :19.05   Median :391.44  
##  Mean   : 9.549   Mean   :408.2   Mean   :18.46   Mean   :356.67  
##  3rd Qu.:24.000   3rd Qu.:666.0   3rd Qu.:20.20   3rd Qu.:396.23  
##  Max.   :24.000   Max.   :711.0   Max.   :22.00   Max.   :396.90  
##      lstat            medv      
##  Min.   : 1.73   Min.   : 5.00  
##  1st Qu.: 6.95   1st Qu.:17.02  
##  Median :11.36   Median :21.20  
##  Mean   :12.65   Mean   :22.53  
##  3rd Qu.:16.95   3rd Qu.:25.00  
##  Max.   :37.97   Max.   :50.00
class(Boston)
## [1] "data.frame"
dist_eu<-(Boston)
summary(dist_eu)
##       crim                zn             indus            chas        
##  Min.   : 0.00632   Min.   :  0.00   Min.   : 0.46   Min.   :0.00000  
##  1st Qu.: 0.08204   1st Qu.:  0.00   1st Qu.: 5.19   1st Qu.:0.00000  
##  Median : 0.25651   Median :  0.00   Median : 9.69   Median :0.00000  
##  Mean   : 3.61352   Mean   : 11.36   Mean   :11.14   Mean   :0.06917  
##  3rd Qu.: 3.67708   3rd Qu.: 12.50   3rd Qu.:18.10   3rd Qu.:0.00000  
##  Max.   :88.97620   Max.   :100.00   Max.   :27.74   Max.   :1.00000  
##       nox               rm             age              dis        
##  Min.   :0.3850   Min.   :3.561   Min.   :  2.90   Min.   : 1.130  
##  1st Qu.:0.4490   1st Qu.:5.886   1st Qu.: 45.02   1st Qu.: 2.100  
##  Median :0.5380   Median :6.208   Median : 77.50   Median : 3.207  
##  Mean   :0.5547   Mean   :6.285   Mean   : 68.57   Mean   : 3.795  
##  3rd Qu.:0.6240   3rd Qu.:6.623   3rd Qu.: 94.08   3rd Qu.: 5.188  
##  Max.   :0.8710   Max.   :8.780   Max.   :100.00   Max.   :12.127  
##       rad              tax           ptratio          black       
##  Min.   : 1.000   Min.   :187.0   Min.   :12.60   Min.   :  0.32  
##  1st Qu.: 4.000   1st Qu.:279.0   1st Qu.:17.40   1st Qu.:375.38  
##  Median : 5.000   Median :330.0   Median :19.05   Median :391.44  
##  Mean   : 9.549   Mean   :408.2   Mean   :18.46   Mean   :356.67  
##  3rd Qu.:24.000   3rd Qu.:666.0   3rd Qu.:20.20   3rd Qu.:396.23  
##  Max.   :24.000   Max.   :711.0   Max.   :22.00   Max.   :396.90  
##      lstat            medv      
##  Min.   : 1.73   Min.   : 5.00  
##  1st Qu.: 6.95   1st Qu.:17.02  
##  Median :11.36   Median :21.20  
##  Mean   :12.65   Mean   :22.53  
##  3rd Qu.:16.95   3rd Qu.:25.00  
##  Max.   :37.97   Max.   :50.00

7. k-means algorithm on the dataset and investigating optimal number of clusters

set.seed(123)
k_max <- (10)
twcss <- sapply(1:k_max, function(k){kmeans(Boston, k)$tot.withinss})
km<-kmeans(Boston, centers = 2)
pairs(Boston[1:5], col = km$cluster)

=========================================================================================== Optimal number of clusters seems to be 2. Crime rates seems to be highly correlated with nox ===========================================================================================

Bonus exercise

performing k-means on original Boston data

km <- kmeans(Boston, centers = 3)
pairs(Boston[1:5], col = km$cluster)

# Scaling original Boston data

boston_scaled <- scale(Boston)
summary(boston_scaled)
##       crim                 zn               indus        
##  Min.   :-0.419367   Min.   :-0.48724   Min.   :-1.5563  
##  1st Qu.:-0.410563   1st Qu.:-0.48724   1st Qu.:-0.8668  
##  Median :-0.390280   Median :-0.48724   Median :-0.2109  
##  Mean   : 0.000000   Mean   : 0.00000   Mean   : 0.0000  
##  3rd Qu.: 0.007389   3rd Qu.: 0.04872   3rd Qu.: 1.0150  
##  Max.   : 9.924110   Max.   : 3.80047   Max.   : 2.4202  
##       chas              nox                rm               age         
##  Min.   :-0.2723   Min.   :-1.4644   Min.   :-3.8764   Min.   :-2.3331  
##  1st Qu.:-0.2723   1st Qu.:-0.9121   1st Qu.:-0.5681   1st Qu.:-0.8366  
##  Median :-0.2723   Median :-0.1441   Median :-0.1084   Median : 0.3171  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.:-0.2723   3rd Qu.: 0.5981   3rd Qu.: 0.4823   3rd Qu.: 0.9059  
##  Max.   : 3.6648   Max.   : 2.7296   Max.   : 3.5515   Max.   : 1.1164  
##       dis               rad               tax             ptratio       
##  Min.   :-1.2658   Min.   :-0.9819   Min.   :-1.3127   Min.   :-2.7047  
##  1st Qu.:-0.8049   1st Qu.:-0.6373   1st Qu.:-0.7668   1st Qu.:-0.4876  
##  Median :-0.2790   Median :-0.5225   Median :-0.4642   Median : 0.2746  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.6617   3rd Qu.: 1.6596   3rd Qu.: 1.5294   3rd Qu.: 0.8058  
##  Max.   : 3.9566   Max.   : 1.6596   Max.   : 1.7964   Max.   : 1.6372  
##      black             lstat              medv        
##  Min.   :-3.9033   Min.   :-1.5296   Min.   :-1.9063  
##  1st Qu.: 0.2049   1st Qu.:-0.7986   1st Qu.:-0.5989  
##  Median : 0.3808   Median :-0.1811   Median :-0.1449  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.4332   3rd Qu.: 0.6024   3rd Qu.: 0.2683  
##  Max.   : 0.4406   Max.   : 3.5453   Max.   : 2.9865
class(boston_scaled)
## [1] "matrix"
boston_scaled<-as.data.frame(boston_scaled)

Fitting the linear discriminant analysis on the original Boston data

lda.fit <- lda(km$cluster~ ., data = boston_scaled)
lda.fit
## Call:
## lda(km$cluster ~ ., data = boston_scaled)
## 
## Prior probabilities of groups:
##         1         2         3 
## 0.5296443 0.1996047 0.2707510 
## 
## Group means:
##         crim          zn      indus          chas        nox         rm
## 1 -0.3920779  0.27670879 -0.6513071  0.0214843827 -0.6152775  0.2573427
## 2 -0.3293317 -0.07332724  0.2818828  0.0005392655  0.2816899 -0.1453417
## 3  1.0097765 -0.48724019  1.0662784 -0.0424254043  0.9959393 -0.3962652
##          age        dis        rad         tax    ptratio       black
## 1 -0.4572006  0.5121870 -0.6013344 -0.78136288 -0.2690134  0.34109296
## 2  0.1822823 -0.2378455 -0.5418150 -0.01444889 -0.3768823  0.07010933
## 3  0.7599946 -0.8265965  1.5757732  1.53915759  0.8040926 -0.71893398
##         lstat        medv
## 1 -0.43621538  0.36234147
## 2  0.01371321 -0.03812375
## 3  0.84321670 -0.68070813
## 
## Coefficients of linear discriminants:
##                  LD1         LD2
## crim     0.048210477  0.05079118
## zn       0.253528315  0.06311589
## indus    0.369497254  0.12674727
## chas    -0.047064817  0.01998369
## nox     -0.063156250 -0.49621758
## rm      -0.005144383  0.09537352
## age     -0.118710969  0.05412142
## dis     -0.385151599  0.17969944
## rad      1.996321584  3.05733525
## tax      4.535785039 -2.77688761
## ptratio  0.122064688  0.19196217
## black   -0.029200518  0.06353722
## lstat    0.085030308  0.12666624
## medv     0.157444662 -0.10356584
## 
## Proportion of trace:
##    LD1    LD2 
## 0.9812 0.0188

Drawing the LDA (bi)plot

lda.arrows <- function(x, myscale = 1, arrow_heads = 0.1, color = "red", tex = 0.75, choices = c(1,2)){
  heads <- coef(x)
  arrows(x0 = 0, y0 = 0, 
         x1 = myscale * heads[,choices[1]], 
         y1 = myscale * heads[,choices[2]], col=color, length = arrow_heads)
  text(myscale * heads[,choices], labels = row.names(heads), 
       cex = tex, col=color, pos=3)
}
classes<-as.numeric(km$cluster)
plot(lda.fit, col=classes, dimen = 2)
lda.arrows(lda.fit, myscale = 1)

====================================================== tax and rad are the most influential linear separators for the clusters ======================================================

Super-bonus exercise

lda.fit <- lda(crime~zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+black+lstat+medv, data = train)
lda.fit
## Call:
## lda(crime ~ zn + indus + chas + nox + rm + age + dis + rad + 
##     tax + ptratio + black + lstat + medv, data = train)
## 
## Prior probabilities of groups:
##       low   med_low  med_high      high 
## 0.2698020 0.2277228 0.2450495 0.2574257 
## 
## Group means:
##                  zn      indus        chas        nox          rm
## low       0.9290821 -0.9043974 -0.12784833 -0.8811777  0.45136424
## med_low  -0.0449532 -0.3522661 -0.01556166 -0.6080292 -0.14858057
## med_high -0.4075493  0.1798342  0.20489520  0.3998634  0.07224354
## high     -0.4872402  1.0170690 -0.08304540  1.0894043 -0.52326309
##                 age        dis        rad        tax    ptratio
## low      -0.8799431  0.8757234 -0.6836912 -0.7390553 -0.4545029
## med_low  -0.3811610  0.4085716 -0.5412094 -0.4611175 -0.1295800
## med_high  0.4411915 -0.3657406 -0.4435998 -0.3185150 -0.2500720
## high      0.8155168 -0.8564699  1.6386213  1.5144083  0.7813507
##                black       lstat        medv
## low       0.37709554 -0.76762917  0.52591755
## med_low   0.31030831 -0.11933711  0.01345156
## med_high  0.05261903  0.05490616  0.13360834
## high     -0.83537529  0.92160860 -0.77847650
## 
## Coefficients of linear discriminants:
##                 LD1         LD2        LD3
## zn       0.08953054  0.56113425 -1.0101364
## indus    0.07218634 -0.22174594  0.2397393
## chas    -0.09531663 -0.05996422  0.1169146
## nox      0.27621683 -0.79385816 -1.3189436
## rm      -0.13342457 -0.11478356 -0.2125507
## age      0.22966338 -0.41609631 -0.1325351
## dis     -0.02814771 -0.22710599  0.2165613
## rad      3.59273354  1.08337388 -0.2786836
## tax      0.18626877 -0.09533914  0.8325966
## ptratio  0.10340365 -0.04811054 -0.3811377
## black   -0.09074088  0.06197170  0.1196124
## lstat    0.21828270 -0.22692557  0.4064369
## medv     0.19223067 -0.42219229 -0.1267976
## 
## Proportion of trace:
##    LD1    LD2    LD3 
## 0.9580 0.0322 0.0098
model_predictors <- dplyr::select(train, -crime, -crime.1)

checking the dimensions

dim(model_predictors)
## [1] 404  13
dim(lda.fit$scaling)
## [1] 13  3

check the dimensions

dim(model_predictors)
## [1] 404  13
dim(lda.fit$scaling)
## [1] 13  3

matrix multiplication

matrix_product <- as.matrix(model_predictors) %*% lda.fit$scaling
matrix_product <- as.data.frame(matrix_product)
library(plotly)
## 
## Attaching package: 'plotly'
## The following object is masked from 'package:MASS':
## 
##     select
## The following object is masked from 'package:ggplot2':
## 
##     last_plot
## The following object is masked from 'package:stats':
## 
##     filter
## The following object is masked from 'package:graphics':
## 
##     layout
plot_ly(x = matrix_product$LD1, y = matrix_product$LD2, z = matrix_product$LD3, type= 'scatter3d', mode='markers', color=train$crime)


5 reading the human data

human2 <-read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human2.txt",header = T, sep=",")

5.1 Graphical overview of the data

library(ggplot2)
library(GGally)
ggpairs(human2, lower = list(combo = wrap("facethist", bins = 20)))

========================================================================================= Correlations among 8 variables can be seen from the above ggpairs plot. It is noticeble that there exits a highest positive correlation between Life expectancy at birth and Expected years of schooling . Whereas, there exists a highest negative correlation between Maternal mortality ratio and Life expectancy at birth. =========================================================================================

5.1 summaries of the varaibles in the human data

summary(human2)
##     Edu2.FM          Labo.FM          Edu.Exp         Life.Exp    
##  Min.   :0.1717   Min.   :0.1857   Min.   : 5.40   Min.   :49.00  
##  1st Qu.:0.7264   1st Qu.:0.5984   1st Qu.:11.25   1st Qu.:66.30  
##  Median :0.9375   Median :0.7535   Median :13.50   Median :74.20  
##  Mean   :0.8529   Mean   :0.7074   Mean   :13.18   Mean   :71.65  
##  3rd Qu.:0.9968   3rd Qu.:0.8535   3rd Qu.:15.20   3rd Qu.:77.25  
##  Max.   :1.4967   Max.   :1.0380   Max.   :20.20   Max.   :83.50  
##       GNI            Mat.Mor         Ado.Birth         Parli.F     
##  Min.   :   581   Min.   :   1.0   Min.   :  0.60   Min.   : 0.00  
##  1st Qu.:  4198   1st Qu.:  11.5   1st Qu.: 12.65   1st Qu.:12.40  
##  Median : 12040   Median :  49.0   Median : 33.60   Median :19.30  
##  Mean   : 17628   Mean   : 149.1   Mean   : 47.16   Mean   :20.91  
##  3rd Qu.: 24512   3rd Qu.: 190.0   3rd Qu.: 71.95   3rd Qu.:27.95  
##  Max.   :123124   Max.   :1100.0   Max.   :204.80   Max.   :57.50

5.2 Prinicipal component analysis on the non-standardised human data

library(ggfortify)
pca_human2 <- prcomp(human2)
summary(pca_human2)
## Importance of components:
##                              PC1      PC2   PC3   PC4   PC5   PC6    PC7
## Standard deviation     1.854e+04 185.5219 25.19 11.45 3.766 1.566 0.1912
## Proportion of Variance 9.999e-01   0.0001  0.00  0.00 0.000 0.000 0.0000
## Cumulative Proportion  9.999e-01   1.0000  1.00  1.00 1.000 1.000 1.0000
##                           PC8
## Standard deviation     0.1591
## Proportion of Variance 0.0000
## Cumulative Proportion  1.0000

================================================================================== We can obtain 8 principal components PC1-8. Each of these explains a percentage of the total variation in the dataset. That is to say: PC1 explains 99% of the total variance, which means that nearly all of the information in the dataset (8 variables) can be encapsulated by just that one Principal Component. PC2 explains 0.001 of the variance. So, by knowing the position of a sample in relation to just PC1 and PC2, we can get a very accurate view on where it stands in relation to other samples, as just PC1 and PC2 can explain 99% of the variance. ==================================================================================

5.2 Biplot displying the observations by the first two principal components

pca_human2 <- prcomp(human2)
biplot(pca_human2, choices = 1:2, cex=c(0.8,1), col=c("grey40", "deeppink2"))
## Warning in arrows(0, 0, y[, 1L] * 0.8, y[, 2L] * 0.8, col = col[2L], length
## = arrow.len): zero-length arrow is of indeterminate angle and so skipped

## Warning in arrows(0, 0, y[, 1L] * 0.8, y[, 2L] * 0.8, col = col[2L], length
## = arrow.len): zero-length arrow is of indeterminate angle and so skipped

## Warning in arrows(0, 0, y[, 1L] * 0.8, y[, 2L] * 0.8, col = col[2L], length
## = arrow.len): zero-length arrow is of indeterminate angle and so skipped

## Warning in arrows(0, 0, y[, 1L] * 0.8, y[, 2L] * 0.8, col = col[2L], length
## = arrow.len): zero-length arrow is of indeterminate angle and so skipped

## Warning in arrows(0, 0, y[, 1L] * 0.8, y[, 2L] * 0.8, col = col[2L], length
## = arrow.len): zero-length arrow is of indeterminate angle and so skipped

5.3 Standardizing the variables in the human data

human_std <- scale(human2)
summary(human_std)
##     Edu2.FM           Labo.FM           Edu.Exp           Life.Exp      
##  Min.   :-2.8189   Min.   :-2.6247   Min.   :-2.7378   Min.   :-2.7188  
##  1st Qu.:-0.5233   1st Qu.:-0.5484   1st Qu.:-0.6782   1st Qu.:-0.6425  
##  Median : 0.3503   Median : 0.2316   Median : 0.1140   Median : 0.3056  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.5958   3rd Qu.: 0.7350   3rd Qu.: 0.7126   3rd Qu.: 0.6717  
##  Max.   : 2.6646   Max.   : 1.6632   Max.   : 2.4730   Max.   : 1.4218  
##       GNI             Mat.Mor          Ado.Birth          Parli.F       
##  Min.   :-0.9193   Min.   :-0.6992   Min.   :-1.1325   Min.   :-1.8203  
##  1st Qu.:-0.7243   1st Qu.:-0.6496   1st Qu.:-0.8394   1st Qu.:-0.7409  
##  Median :-0.3013   Median :-0.4726   Median :-0.3298   Median :-0.1403  
##  Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.3712   3rd Qu.: 0.1932   3rd Qu.: 0.6030   3rd Qu.: 0.6127  
##  Max.   : 5.6890   Max.   : 4.4899   Max.   : 3.8344   Max.   : 3.1850

=================================================== By scaling the human data all the mean values for the varaibles were reduced to zero ===================================================

5.3 Biplot displying the observations by the first two principal components in the standardized data

pca_human <- prcomp(human_std)
biplot(pca_human, choices = 1:2, cex=c(0.8,1), col=c("grey40", "deeppink2"))

5.4 Biplot displying the observations by the first two principal components in the standardized data

s <- summary(pca_human)
s
## Importance of components:
##                           PC1    PC2     PC3     PC4     PC5     PC6
## Standard deviation     2.0708 1.1397 0.87505 0.77886 0.66196 0.53631
## Proportion of Variance 0.5361 0.1624 0.09571 0.07583 0.05477 0.03595
## Cumulative Proportion  0.5361 0.6984 0.79413 0.86996 0.92473 0.96069
##                            PC7     PC8
## Standard deviation     0.45900 0.32224
## Proportion of Variance 0.02634 0.01298
## Cumulative Proportion  0.98702 1.00000
pca_pr <- round(100*s$importance[2,], digits = 1)
pca_pr
##  PC1  PC2  PC3  PC4  PC5  PC6  PC7  PC8 
## 53.6 16.2  9.6  7.6  5.5  3.6  2.6  1.3
pc_lab <- paste0(names(pca_pr), " (", pca_pr, "%)")
biplot(pca_human, cex = c(0.8, 1), col = c("grey40", "deeppink2"), xlab = pc_lab[1], ylab = pc_lab[2])

===================================================================================================== Standardised data gives a better idea of the variability, with Maternal mortality ratio and Adolescent birth rate cvontributing to the PC1 which explains upto 53.6 variation. While, F/M ratio in the labour force and Percetange of female representatives in parliament contributing to PC2 which explains upto 16.2% variation. Additionally, Maternal mortality ratio and Adolescent birth rate seem to be strongly correlated, whereas F/M ratio in the labour force and Percetange of female representatives in parliament seem to correlate positively. =====================================================================================================

5.5 Tea data analysis

library(FactoMineR)
library(dplyr)
tea<-read.table("http://factominer.free.fr/factomethods/datasets/tea.txt",header = T, sep="\t")
str(tea)
## 'data.frame':    300 obs. of  36 variables:
##  $ breakfast       : Factor w/ 2 levels "breakfast","Not.breakfast": 1 1 2 2 1 2 1 2 1 1 ...
##  $ tea.time        : Factor w/ 2 levels "Not.tea time",..: 1 1 2 1 1 1 2 2 2 1 ...
##  $ evening         : Factor w/ 2 levels "evening","Not.evening": 2 2 1 2 1 2 2 1 2 1 ...
##  $ lunch           : Factor w/ 2 levels "lunch","Not.lunch": 2 2 2 2 2 2 2 2 2 2 ...
##  $ dinner          : Factor w/ 2 levels "dinner","Not.dinner": 2 2 1 1 2 1 2 2 2 2 ...
##  $ always          : Factor w/ 2 levels "always","Not.always": 2 2 2 2 1 2 2 2 2 2 ...
##  $ home            : Factor w/ 2 levels "home","Not.home": 1 1 1 1 1 1 1 1 1 1 ...
##  $ work            : Factor w/ 2 levels "Not.work","work": 1 1 2 1 1 1 1 1 1 1 ...
##  $ tearoom         : Factor w/ 2 levels "Not.tearoom",..: 1 1 1 1 1 1 1 1 1 2 ...
##  $ friends         : Factor w/ 2 levels "friends","Not.friends": 2 2 1 2 2 2 1 2 2 2 ...
##  $ resto           : Factor w/ 2 levels "Not.resto","resto": 1 1 2 1 1 1 1 1 1 1 ...
##  $ pub             : Factor w/ 2 levels "Not.pub","pub": 1 1 1 1 1 1 1 1 1 1 ...
##  $ Tea             : Factor w/ 3 levels "black","Earl Grey",..: 1 1 2 2 2 2 2 1 2 1 ...
##  $ How             : Factor w/ 4 levels "alone","lemon",..: 1 3 1 1 1 1 1 3 3 1 ...
##  $ sugar           : Factor w/ 2 levels "No.sugar","sugar": 2 1 1 2 1 1 1 1 1 1 ...
##  $ how             : Factor w/ 3 levels "tea bag","tea bag+unpackaged",..: 1 1 1 1 1 1 1 1 2 2 ...
##  $ where           : Factor w/ 3 levels "chain store",..: 1 1 1 1 1 1 1 1 2 2 ...
##  $ price           : Factor w/ 6 levels "p_branded","p_cheap",..: 4 6 6 6 6 3 6 6 5 5 ...
##  $ age             : int  39 45 47 23 48 21 37 36 40 37 ...
##  $ sex             : Factor w/ 2 levels "F","M": 2 1 1 2 2 2 2 1 2 2 ...
##  $ SPC             : Factor w/ 7 levels "employee","middle",..: 2 2 4 6 1 6 5 2 5 5 ...
##  $ Sport           : Factor w/ 2 levels "Not.sportsman",..: 2 2 2 1 2 2 2 2 2 1 ...
##  $ age_Q           : Factor w/ 5 levels "+60","15-24",..: 4 5 5 2 5 2 4 4 4 4 ...
##  $ frequency       : Factor w/ 4 levels "+2/day","1 to 2/week",..: 3 3 1 3 1 3 4 2 1 1 ...
##  $ escape.exoticism: Factor w/ 2 levels "escape-exoticism",..: 2 1 2 1 1 2 2 2 2 2 ...
##  $ spirituality    : Factor w/ 2 levels "Not.spirituality",..: 1 1 1 2 2 1 1 1 1 1 ...
##  $ healthy         : Factor w/ 2 levels "healthy","Not.healthy": 1 1 1 1 2 1 1 1 2 1 ...
##  $ diuretic        : Factor w/ 2 levels "diuretic","Not.diuretic": 2 1 1 2 1 2 2 2 2 1 ...
##  $ friendliness    : Factor w/ 2 levels "friendliness",..: 2 2 1 2 1 2 2 1 2 1 ...
##  $ iron.absorption : Factor w/ 2 levels "iron absorption",..: 2 2 2 2 2 2 2 2 2 2 ...
##  $ feminine        : Factor w/ 2 levels "feminine","Not.feminine": 2 2 2 2 2 2 2 1 2 2 ...
##  $ sophisticated   : Factor w/ 2 levels "Not.sophisticated",..: 1 1 1 2 1 1 1 2 2 1 ...
##  $ slimming        : Factor w/ 2 levels "No.slimming",..: 1 1 1 1 1 1 1 1 1 1 ...
##  $ exciting        : Factor w/ 2 levels "exciting","No.exciting": 2 1 2 2 2 2 2 2 2 2 ...
##  $ relaxing        : Factor w/ 2 levels "No.relaxing",..: 1 1 2 2 2 2 2 2 2 2 ...
##  $ effect.on.health: Factor w/ 2 levels "effect on health",..: 2 2 2 2 2 2 2 2 2 2 ...
dim(tea)
## [1] 300  36
glimpse(tea)
## Observations: 300
## Variables: 36
## $ breakfast        <fct> breakfast, breakfast, Not.breakfast, Not.brea...
## $ tea.time         <fct> Not.tea time, Not.tea time, tea time, Not.tea...
## $ evening          <fct> Not.evening, Not.evening, evening, Not.evenin...
## $ lunch            <fct> Not.lunch, Not.lunch, Not.lunch, Not.lunch, N...
## $ dinner           <fct> Not.dinner, Not.dinner, dinner, dinner, Not.d...
## $ always           <fct> Not.always, Not.always, Not.always, Not.alway...
## $ home             <fct> home, home, home, home, home, home, home, hom...
## $ work             <fct> Not.work, Not.work, work, Not.work, Not.work,...
## $ tearoom          <fct> Not.tearoom, Not.tearoom, Not.tearoom, Not.te...
## $ friends          <fct> Not.friends, Not.friends, friends, Not.friend...
## $ resto            <fct> Not.resto, Not.resto, resto, Not.resto, Not.r...
## $ pub              <fct> Not.pub, Not.pub, Not.pub, Not.pub, Not.pub, ...
## $ Tea              <fct> black, black, Earl Grey, Earl Grey, Earl Grey...
## $ How              <fct> alone, milk, alone, alone, alone, alone, alon...
## $ sugar            <fct> sugar, No.sugar, No.sugar, sugar, No.sugar, N...
## $ how              <fct> tea bag, tea bag, tea bag, tea bag, tea bag, ...
## $ where            <fct> chain store, chain store, chain store, chain ...
## $ price            <fct> p_unknown, p_variable, p_variable, p_variable...
## $ age              <int> 39, 45, 47, 23, 48, 21, 37, 36, 40, 37, 32, 3...
## $ sex              <fct> M, F, F, M, M, M, M, F, M, M, M, M, M, M, M, ...
## $ SPC              <fct> middle, middle, other worker, student, employ...
## $ Sport            <fct> sportsman, sportsman, sportsman, Not.sportsma...
## $ age_Q            <fct> 35-44, 45-59, 45-59, 15-24, 45-59, 15-24, 35-...
## $ frequency        <fct> 1/day, 1/day, +2/day, 1/day, +2/day, 1/day, 3...
## $ escape.exoticism <fct> Not.escape-exoticism, escape-exoticism, Not.e...
## $ spirituality     <fct> Not.spirituality, Not.spirituality, Not.spiri...
## $ healthy          <fct> healthy, healthy, healthy, healthy, Not.healt...
## $ diuretic         <fct> Not.diuretic, diuretic, diuretic, Not.diureti...
## $ friendliness     <fct> Not.friendliness, Not.friendliness, friendlin...
## $ iron.absorption  <fct> Not.iron absorption, Not.iron absorption, Not...
## $ feminine         <fct> Not.feminine, Not.feminine, Not.feminine, Not...
## $ sophisticated    <fct> Not.sophisticated, Not.sophisticated, Not.sop...
## $ slimming         <fct> No.slimming, No.slimming, No.slimming, No.sli...
## $ exciting         <fct> No.exciting, exciting, No.exciting, No.exciti...
## $ relaxing         <fct> No.relaxing, No.relaxing, relaxing, relaxing,...
## $ effect.on.health <fct> No.effect on health, No.effect on health, No....

================================================================================================================================================== Tea dataset contains 300 observations (8)tea consumers) of 36 variables of answeres to a survey about their consumption of tea. These include how they consume tea, how they think of tea and descriptive questions (sex, age, socio-professional category and sport practise). Except for the age, all the variables are categorical. For the age, the data set has two different variables: a continuous and a categorical one ==================================================================================================================================================

library(tidyverse)
## -- Attaching packages ----------------------------------------------------------------------------------------------------- tidyverse 1.2.1 --
## v tibble  2.1.3     v purrr   0.3.3
## v tidyr   1.0.0     v stringr 1.4.0
## v readr   1.3.1     v forcats 0.4.0
## -- Conflicts -------------------------------------------------------------------------------------------------------- tidyverse_conflicts() --
## x plotly::filter() masks dplyr::filter(), stats::filter()
## x dplyr::lag()     masks stats::lag()
## x plotly::select() masks MASS::select(), dplyr::select()
keep_columns <- c("Tea", "How", "how", "sugar", "where", "lunch")
tea_time <- select(tea, one_of(keep_columns))
summary(tea_time)
##         Tea         How                      how           sugar    
##  black    : 74   alone:195   tea bag           :170   No.sugar:155  
##  Earl Grey:193   lemon: 33   tea bag+unpackaged: 94   sugar   :145  
##  green    : 33   milk : 63   unpackaged        : 36                 
##                  other:  9                                          
##                   where           lunch    
##  chain store         :192   lunch    : 44  
##  chain store+tea shop: 78   Not.lunch:256  
##  tea shop            : 30                  
## 
str(tea_time)
## 'data.frame':    300 obs. of  6 variables:
##  $ Tea  : Factor w/ 3 levels "black","Earl Grey",..: 1 1 2 2 2 2 2 1 2 1 ...
##  $ How  : Factor w/ 4 levels "alone","lemon",..: 1 3 1 1 1 1 1 3 3 1 ...
##  $ how  : Factor w/ 3 levels "tea bag","tea bag+unpackaged",..: 1 1 1 1 1 1 1 1 2 2 ...
##  $ sugar: Factor w/ 2 levels "No.sugar","sugar": 2 1 1 2 1 1 1 1 1 1 ...
##  $ where: Factor w/ 3 levels "chain store",..: 1 1 1 1 1 1 1 1 2 2 ...
##  $ lunch: Factor w/ 2 levels "lunch","Not.lunch": 2 2 2 2 2 2 2 2 2 2 ...
gather(tea_time) %>% ggplot(aes(value)) + facet_wrap("key", scales = "free") + geom_bar() + theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 8))
## Warning: attributes are not identical across measure variables;
## they will be dropped

========================= Tea data is modified to include 300 observations of 6 variables =========================

5.5 Multiple correspondence analysis on the tea data

mca <- MCA(tea_time, graph = FALSE)
summary(mca)
## 
## Call:
## MCA(X = tea_time, graph = FALSE) 
## 
## 
## Eigenvalues
##                        Dim.1   Dim.2   Dim.3   Dim.4   Dim.5   Dim.6
## Variance               0.279   0.261   0.219   0.189   0.177   0.156
## % of var.             15.238  14.232  11.964  10.333   9.667   8.519
## Cumulative % of var.  15.238  29.471  41.435  51.768  61.434  69.953
##                        Dim.7   Dim.8   Dim.9  Dim.10  Dim.11
## Variance               0.144   0.141   0.117   0.087   0.062
## % of var.              7.841   7.705   6.392   4.724   3.385
## Cumulative % of var.  77.794  85.500  91.891  96.615 100.000
## 
## Individuals (the 10 first)
##                       Dim.1    ctr   cos2    Dim.2    ctr   cos2    Dim.3
## 1                  | -0.298  0.106  0.086 | -0.328  0.137  0.105 | -0.327
## 2                  | -0.237  0.067  0.036 | -0.136  0.024  0.012 | -0.695
## 3                  | -0.369  0.162  0.231 | -0.300  0.115  0.153 | -0.202
## 4                  | -0.530  0.335  0.460 | -0.318  0.129  0.166 |  0.211
## 5                  | -0.369  0.162  0.231 | -0.300  0.115  0.153 | -0.202
## 6                  | -0.369  0.162  0.231 | -0.300  0.115  0.153 | -0.202
## 7                  | -0.369  0.162  0.231 | -0.300  0.115  0.153 | -0.202
## 8                  | -0.237  0.067  0.036 | -0.136  0.024  0.012 | -0.695
## 9                  |  0.143  0.024  0.012 |  0.871  0.969  0.435 | -0.067
## 10                 |  0.476  0.271  0.140 |  0.687  0.604  0.291 | -0.650
##                       ctr   cos2  
## 1                   0.163  0.104 |
## 2                   0.735  0.314 |
## 3                   0.062  0.069 |
## 4                   0.068  0.073 |
## 5                   0.062  0.069 |
## 6                   0.062  0.069 |
## 7                   0.062  0.069 |
## 8                   0.735  0.314 |
## 9                   0.007  0.003 |
## 10                  0.643  0.261 |
## 
## Categories (the 10 first)
##                        Dim.1     ctr    cos2  v.test     Dim.2     ctr
## black              |   0.473   3.288   0.073   4.677 |   0.094   0.139
## Earl Grey          |  -0.264   2.680   0.126  -6.137 |   0.123   0.626
## green              |   0.486   1.547   0.029   2.952 |  -0.933   6.111
## alone              |  -0.018   0.012   0.001  -0.418 |  -0.262   2.841
## lemon              |   0.669   2.938   0.055   4.068 |   0.531   1.979
## milk               |  -0.337   1.420   0.030  -3.002 |   0.272   0.990
## other              |   0.288   0.148   0.003   0.876 |   1.820   6.347
## tea bag            |  -0.608  12.499   0.483 -12.023 |  -0.351   4.459
## tea bag+unpackaged |   0.350   2.289   0.056   4.088 |   1.024  20.968
## unpackaged         |   1.958  27.432   0.523  12.499 |  -1.015   7.898
##                       cos2  v.test     Dim.3     ctr    cos2  v.test  
## black                0.003   0.929 |  -1.081  21.888   0.382 -10.692 |
## Earl Grey            0.027   2.867 |   0.433   9.160   0.338  10.053 |
## green                0.107  -5.669 |  -0.108   0.098   0.001  -0.659 |
## alone                0.127  -6.164 |  -0.113   0.627   0.024  -2.655 |
## lemon                0.035   3.226 |   1.329  14.771   0.218   8.081 |
## milk                 0.020   2.422 |   0.013   0.003   0.000   0.116 |
## other                0.102   5.534 |  -2.524  14.526   0.197  -7.676 |
## tea bag              0.161  -6.941 |  -0.065   0.183   0.006  -1.287 |
## tea bag+unpackaged   0.478  11.956 |   0.019   0.009   0.000   0.226 |
## unpackaged           0.141  -6.482 |   0.257   0.602   0.009   1.640 |
## 
## Categorical variables (eta2)
##                      Dim.1 Dim.2 Dim.3  
## Tea                | 0.126 0.108 0.410 |
## How                | 0.076 0.190 0.394 |
## how                | 0.708 0.522 0.010 |
## sugar              | 0.065 0.001 0.336 |
## where              | 0.702 0.681 0.055 |
## lunch              | 0.000 0.064 0.111 |

================================================================================================================================= Dimension 1 seems to explain the highest variance as revealed by MCA analysis. =================================================================================================================================

5.5 Plotting MCA results

plot(mca, invisible=c("ind"), habillage = "quali")

=================================================================== In MCA plot the distance between variable categories gives a measure of their similarity. It is apparent from the plot that for ex, tea bag and chain store are more similar and Other variable is different from all the categories ===================================================================

5.5 Screeplot of MCA analysis

library(factoextra)
## Welcome! Related Books: `Practical Guide To Cluster Analysis in R` at https://goo.gl/13EFCZ
get_eig(mca)
##        eigenvalue variance.percent cumulative.variance.percent
## Dim.1  0.27937118        15.238428                    15.23843
## Dim.2  0.26092645        14.232352                    29.47078
## Dim.3  0.21933575        11.963768                    41.43455
## Dim.4  0.18943794        10.332978                    51.76753
## Dim.5  0.17722310         9.666715                    61.43424
## Dim.6  0.15617745         8.518770                    69.95301
## Dim.7  0.14375727         7.841306                    77.79432
## Dim.8  0.14126310         7.705260                    85.49958
## Dim.9  0.11717818         6.391537                    91.89111
## Dim.10 0.08660997         4.724180                    96.61529
## Dim.11 0.06205294         3.384706                   100.00000
fviz_screeplot(mca,addlabels = TRUE, ylim = c(0, 50))

====================================================================================== Dimension 1 can explain upto 15.2% variance as can be seen in the Scree plot ======================================================================================



PART I

6.1.1 Reading RATS data

RATS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/rats.txt", header = TRUE, sep = '\t')
library(dplyr)
library(tidyr)
library(ggplot2)
glimpse(RATS)
## Observations: 16
## Variables: 13
## $ ID    <int> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
## $ Group <int> 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
## $ WD1   <int> 240, 225, 245, 260, 255, 260, 275, 245, 410, 405, 445, 5...
## $ WD8   <int> 250, 230, 250, 255, 260, 265, 275, 255, 415, 420, 445, 5...
## $ WD15  <int> 255, 230, 250, 255, 255, 270, 260, 260, 425, 430, 450, 5...
## $ WD22  <int> 260, 232, 255, 265, 270, 275, 270, 268, 428, 440, 452, 5...
## $ WD29  <int> 262, 240, 262, 265, 270, 275, 273, 270, 438, 448, 455, 5...
## $ WD36  <int> 258, 240, 265, 268, 273, 277, 274, 265, 443, 460, 455, 5...
## $ WD43  <int> 266, 243, 267, 270, 274, 278, 276, 265, 442, 458, 451, 5...
## $ WD44  <int> 266, 244, 267, 272, 273, 278, 271, 267, 446, 464, 450, 5...
## $ WD50  <int> 265, 238, 264, 274, 276, 284, 282, 273, 456, 475, 462, 6...
## $ WD57  <int> 272, 247, 268, 273, 278, 279, 281, 274, 468, 484, 466, 6...
## $ WD64  <int> 278, 245, 269, 275, 280, 281, 284, 278, 478, 496, 472, 6...
summary(RATS)
##        ID            Group           WD1             WD8       
##  Min.   : 1.00   Min.   :1.00   Min.   :225.0   Min.   :230.0  
##  1st Qu.: 4.75   1st Qu.:1.00   1st Qu.:252.5   1st Qu.:255.0  
##  Median : 8.50   Median :1.50   Median :340.0   Median :345.0  
##  Mean   : 8.50   Mean   :1.75   Mean   :365.9   Mean   :369.1  
##  3rd Qu.:12.25   3rd Qu.:2.25   3rd Qu.:480.0   3rd Qu.:476.2  
##  Max.   :16.00   Max.   :3.00   Max.   :555.0   Max.   :560.0  
##       WD15            WD22            WD29            WD36      
##  Min.   :230.0   Min.   :232.0   Min.   :240.0   Min.   :240.0  
##  1st Qu.:255.0   1st Qu.:267.2   1st Qu.:268.8   1st Qu.:267.2  
##  Median :347.5   Median :351.5   Median :356.5   Median :360.0  
##  Mean   :372.5   Mean   :379.2   Mean   :383.9   Mean   :387.0  
##  3rd Qu.:486.2   3rd Qu.:492.5   3rd Qu.:497.8   3rd Qu.:504.2  
##  Max.   :565.0   Max.   :580.0   Max.   :590.0   Max.   :597.0  
##       WD43            WD44            WD50            WD57      
##  Min.   :243.0   Min.   :244.0   Min.   :238.0   Min.   :247.0  
##  1st Qu.:269.2   1st Qu.:270.0   1st Qu.:273.8   1st Qu.:273.8  
##  Median :360.0   Median :362.0   Median :370.0   Median :373.5  
##  Mean   :386.0   Mean   :388.3   Mean   :394.6   Mean   :398.6  
##  3rd Qu.:501.0   3rd Qu.:510.5   3rd Qu.:516.0   3rd Qu.:524.5  
##  Max.   :595.0   Max.   :595.0   Max.   :612.0   Max.   :618.0  
##       WD64      
##  Min.   :245.0  
##  1st Qu.:278.0  
##  Median :378.0  
##  Mean   :404.1  
##  3rd Qu.:530.8  
##  Max.   :628.0

6.1.2 Factoring the categorical variables in RATS data

RATS$ID <- factor(RATS$ID)
RATS$Group <- factor(RATS$Group)
glimpse(RATS)
## Observations: 16
## Variables: 13
## $ ID    <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
## $ Group <fct> 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
## $ WD1   <int> 240, 225, 245, 260, 255, 260, 275, 245, 410, 405, 445, 5...
## $ WD8   <int> 250, 230, 250, 255, 260, 265, 275, 255, 415, 420, 445, 5...
## $ WD15  <int> 255, 230, 250, 255, 255, 270, 260, 260, 425, 430, 450, 5...
## $ WD22  <int> 260, 232, 255, 265, 270, 275, 270, 268, 428, 440, 452, 5...
## $ WD29  <int> 262, 240, 262, 265, 270, 275, 273, 270, 438, 448, 455, 5...
## $ WD36  <int> 258, 240, 265, 268, 273, 277, 274, 265, 443, 460, 455, 5...
## $ WD43  <int> 266, 243, 267, 270, 274, 278, 276, 265, 442, 458, 451, 5...
## $ WD44  <int> 266, 244, 267, 272, 273, 278, 271, 267, 446, 464, 450, 5...
## $ WD50  <int> 265, 238, 264, 274, 276, 284, 282, 273, 456, 475, 462, 6...
## $ WD57  <int> 272, 247, 268, 273, 278, 279, 281, 274, 468, 484, 466, 6...
## $ WD64  <int> 278, 245, 269, 275, 280, 281, 284, 278, 478, 496, 472, 6...

6.1.3 Converting wide form to long form of RATS data

RATSL <- RATS %>%
  gather(key = WD, value = Weight, -ID, -Group) %>%
  mutate(Time = as.integer(substr(WD,3,4))) 
glimpse(RATSL)
## Observations: 176
## Variables: 5
## $ ID     <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ...
## $ Group  <fct> 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1...
## $ WD     <chr> "WD1", "WD1", "WD1", "WD1", "WD1", "WD1", "WD1", "WD1",...
## $ Weight <int> 240, 225, 245, 260, 255, 260, 275, 245, 410, 405, 445, ...
## $ Time   <int> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 8, 8...
summary(RATSL)
##        ID      Group       WD                Weight           Time      
##  1      : 11   1:88   Length:176         Min.   :225.0   Min.   : 1.00  
##  2      : 11   2:44   Class :character   1st Qu.:267.0   1st Qu.:15.00  
##  3      : 11   3:44   Mode  :character   Median :344.5   Median :36.00  
##  4      : 11                             Mean   :384.5   Mean   :33.55  
##  5      : 11                             3rd Qu.:511.2   3rd Qu.:50.00  
##  6      : 11                             Max.   :628.0   Max.   :64.00  
##  (Other):110

================================================================================================= RATS data contain results from a nutrition study conducted in three groups of rats. The groups were put on different diets, and each animal’s body weight (grams) was recorded repeatedly (approximately) weekly, except in week seven when two recordings were taken) over a 9-week period. Original data format in its wide format contains 16 observations of 13 variables namely “ID” “Group” “WD1” “WD8” “WD15” “WD22” “WD29” “WD36” “WD43” “WD44” “WD50” “WD57” “WD64”, with each rat’s repeated weight measurements in a single row and weekdays in columns.In the long form the data now shows 176 observations with 5 variables ID,Group, WD, Weight and time, Each row represent one time point per rat, so each rat will have data in multiple rows. =================================================================================================

6.1.2 Plotting the RATSL data

ggplot(RATSL, aes(x = Time, y = Weight, linetype = ID)) +
  geom_line() +
  scale_linetype_manual(values = rep(1:16, times=4)) +
  facet_grid(. ~ Group, labeller = label_both) +
  theme(legend.position = "none") + 
  scale_y_continuous(limits = c(min(RATSL$Weight), max(RATSL$Weight)))

6.1.5 Plotting the RATSL data

ggplot(RATSL, aes(x = Time, y = Weight, group = ID)) +
  geom_line(aes(linetype = Group))+
  scale_x_continuous(name = "Time (days)", breaks = seq(0, 60, 10))+
  scale_y_continuous(name = "Weight (grams)")+
  theme(legend.position = "top")

======================================================================================= It is obvious from the graphs that the Rats from group 1 have lower weights throughout the study period with minor increase. Rats from group 2 and 3 who have higher weights at the beginning seem to gain weight over the 9 weeks of the study.The phenomena known as tracking. Also it is apparent that there exists substantial intra-group variation in group 2 and 3 ======================================================================================= # 6.1.6 Standardise the variable in RATSL data

RATSL <- RATSL %>%
  group_by(Time) %>%
  mutate(stdWeight = (Weight - mean(Weight))/sd(Weight) ) %>%
  ungroup()
glimpse(RATSL)
## Observations: 176
## Variables: 6
## $ ID        <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1...
## $ Group     <fct> 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 1, 1...
## $ WD        <chr> "WD1", "WD1", "WD1", "WD1", "WD1", "WD1", "WD1", "WD...
## $ Weight    <int> 240, 225, 245, 260, 255, 260, 275, 245, 410, 405, 44...
## $ Time      <int> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 8...
## $ stdWeight <dbl> -1.0011429, -1.1203857, -0.9613953, -0.8421525, -0.8...

6.1.7 Plotting again with the standardised Weight

ggplot(RATSL, aes(x = Time, y = stdWeight, linetype = ID)) +
  geom_line() +
  scale_linetype_manual(values = rep(1:16, times=4)) +
  facet_grid(. ~ Group, labeller = label_both) +
  scale_y_continuous(name = "standardized Weight")

================================================================================================ The tracking phenomena of weight gain across the groups can be seen clearly with a plot of the stanardized values of each observation, suggesting group 2 and 3 seems to gain weight during the study period ================================================================================================

6.1.8 Number of WD, Summary data with mean and standard error of Weight by group and time

n <- RATSL$Time %>% unique() %>% length()
RATSS <- RATSL %>%
  group_by(Group, Time) %>%
  summarise( mean = mean(Weight), se = sd(Weight)/sqrt(n) ) %>%
  ungroup()
  glimpse(RATSS)
## Observations: 33
## Variables: 4
## $ Group <fct> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,...
## $ Time  <int> 1, 8, 15, 22, 29, 36, 43, 44, 50, 57, 64, 1, 8, 15, 22, ...
## $ mean  <dbl> 250.625, 255.000, 254.375, 261.875, 264.625, 265.000, 26...
## $ se    <dbl> 4.589478, 3.947710, 3.460116, 4.100800, 3.333956, 3.5529...

6.1.9 Plotting the mean profiles of RATSS data

ggplot(RATSS, aes(x = Time, y = mean, linetype = Group, shape = Group)) +
  geom_line() +
  scale_linetype_manual(values = c(1,2,3)) +
  geom_point(size=3) +
  scale_shape_manual(values = c(1,2,3)) +
  geom_errorbar(aes(ymin = mean - se, ymax = mean + se, linetype="1"), width=0.3) +
  theme(legend.position = c(0.8,0.8)) +
  scale_y_continuous(name = "mean(Weight) +/- se(Weight)")

=========================================================================================================== Mean weight profiles of the 3 different groups can be seen along with indication of varation of the weight at each time point during study period. It is apparent that group 2 and 3 differ considerably compared to group1 both in terms of baseline weight and trend in weight gain ===========================================================================================================

6.1.10 Creating a summary data by Group and ID with mean as the summary variable (ignoring baseline WD 0).

RATSL8S <- RATSL %>%
  filter(WD > 0) %>%
  group_by(Group, ID) %>%
  summarise( mean=mean(Weight) ) %>%
  ungroup()
glimpse(RATSL8S)
## Observations: 16
## Variables: 3
## $ Group <fct> 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
## $ ID    <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
## $ mean  <dbl> 261.0909, 237.6364, 260.1818, 266.5455, 269.4545, 274.72...

6.1.11 Drawing a boxplot of the mean weights versus Group

ggplot(RATSL8S, aes(x = Group, y = mean)) +
  geom_boxplot() +
  stat_summary(fun.y = "mean", geom = "point", shape=23, size=4, fill = "white") +
  scale_y_continuous(name = "mean(Weight), Time 1-64")

========================================================================================================= Summary measure method was applied by transforming the repeated measurements into a single value that captures some essential feature od the weight gain over time of a rat in each group and taking all time points except the baseline. The diagram indicates that the mean summary measure is more variable in all groups with an outlier in each group =========================================================================================================

6.1.12 Creating a new RATS data by filtering the outlier

RATSL8S1 <- RATSL8S %>%
  filter((mean < 300 & mean > 250 & Group ==1)
         |(mean < 550 & Group ==2)
         |(mean > 500 & Group ==3))

6.1.13 Adjusting the ggplot code and drawing the plot again with the new data

ggplot(RATSL8S1, aes(x = Group, y = mean)) +
  geom_boxplot() +
  stat_summary(fun.y = "mean", geom = "point", shape=23, size=4, fill = "white") +
  scale_y_continuous(name = "mean(Weight), Time 1-64")

========================================================================================================== An outlier in each group was removed and the mean weight profiles were plotted again. Without outliers, the mean of the weight in group 2 and 3 seems higher than group 1. There seems to be an evidence of difference in location of the summary measure distributions in each group ==========================================================================================================

6.1.14 Adding the baseline from the original data as a new variable to the summary data

RATSL8S2 <- RATSL8S %>%
  mutate(baseline = RATS$WD1)

6.1.15 Fitting the linear model with the mean as the response

fit <- lm(mean ~ baseline + Group , data = RATSL8S2)

6.1.16 Computing the analysis of variance table for the fitted model with anova()

anova(fit)
## Analysis of Variance Table
## 
## Response: mean
##           Df Sum Sq Mean Sq   F value    Pr(>F)    
## baseline   1 252125  252125 2237.0655 5.217e-15 ***
## Group      2    726     363    3.2219   0.07586 .  
## Residuals 12   1352     113                        
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

=========================================================================================================== By incorporating the baseline values in to the summary measures data and performing ANOVA analysis, we can see that the baseline weight is strongly related to the weights observed after the intervention, but there seems to be no evidence of Group 2 and 3 gaining significantly more weight compared to Group 1 ===========================================================================================================

PART II

6.2.1 Reading the BPRS data

BPRS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/BPRS.txt", sep  =" ", header = T)
names(BPRS)
##  [1] "treatment" "subject"   "week0"     "week1"     "week2"    
##  [6] "week3"     "week4"     "week5"     "week6"     "week7"    
## [11] "week8"
str(BPRS)
## 'data.frame':    40 obs. of  11 variables:
##  $ treatment: int  1 1 1 1 1 1 1 1 1 1 ...
##  $ subject  : int  1 2 3 4 5 6 7 8 9 10 ...
##  $ week0    : int  42 58 54 55 72 48 71 30 41 57 ...
##  $ week1    : int  36 68 55 77 75 43 61 36 43 51 ...
##  $ week2    : int  36 61 41 49 72 41 47 38 39 51 ...
##  $ week3    : int  43 55 38 54 65 38 30 38 35 55 ...
##  $ week4    : int  41 43 43 56 50 36 27 31 28 53 ...
##  $ week5    : int  40 34 28 50 39 29 40 26 22 43 ...
##  $ week6    : int  38 28 29 47 32 33 30 26 20 43 ...
##  $ week7    : int  47 28 25 42 38 27 31 25 23 39 ...
##  $ week8    : int  51 28 24 46 32 25 31 24 21 32 ...
summary(BPRS)
##    treatment      subject          week0           week1      
##  Min.   :1.0   Min.   : 1.00   Min.   :24.00   Min.   :23.00  
##  1st Qu.:1.0   1st Qu.: 5.75   1st Qu.:38.00   1st Qu.:35.00  
##  Median :1.5   Median :10.50   Median :46.00   Median :41.00  
##  Mean   :1.5   Mean   :10.50   Mean   :48.00   Mean   :46.33  
##  3rd Qu.:2.0   3rd Qu.:15.25   3rd Qu.:58.25   3rd Qu.:54.25  
##  Max.   :2.0   Max.   :20.00   Max.   :78.00   Max.   :95.00  
##      week2          week3           week4           week5      
##  Min.   :26.0   Min.   :24.00   Min.   :20.00   Min.   :20.00  
##  1st Qu.:32.0   1st Qu.:29.75   1st Qu.:28.00   1st Qu.:26.00  
##  Median :38.0   Median :36.50   Median :34.50   Median :30.50  
##  Mean   :41.7   Mean   :39.15   Mean   :36.35   Mean   :32.55  
##  3rd Qu.:49.0   3rd Qu.:44.50   3rd Qu.:43.00   3rd Qu.:38.00  
##  Max.   :75.0   Max.   :76.00   Max.   :66.00   Max.   :64.00  
##      week6           week7          week8      
##  Min.   :19.00   Min.   :18.0   Min.   :20.00  
##  1st Qu.:22.75   1st Qu.:23.0   1st Qu.:22.75  
##  Median :28.50   Median :30.0   Median :28.00  
##  Mean   :31.23   Mean   :32.2   Mean   :31.43  
##  3rd Qu.:37.00   3rd Qu.:38.0   3rd Qu.:35.25  
##  Max.   :64.00   Max.   :62.0   Max.   :75.00

=========================================================== BPRS data contains 40 male subjects were randomly assigned to one of two treatment groups and each subject was rated on the brief psychiatric rating scale (BPRS) measured before treatment began (week 0) and then at weekly intervals for eight weeks. The BPRS assesses the level of 18 symptom constructs such as hostility, suspiciousness, hallucinations and grandiosity; each of these is rated from one (not present) to seven (extremely severe). The scale is used to evaluate patients suspected of having schizophrenia. ============================================================

6.2.2 Factoring the categorical variables in BPRS data

BPRS$treatment <- factor(BPRS$treatment)
BPRS$subject <- factor(BPRS$subject)

6.2.3 Converting wide form to long form of BPRS data

BPRSL <-  BPRS %>% gather(key = weeks, value = bprs, -treatment, -subject)
BPRSL <-  BPRSL %>% mutate(week = as.integer(substr(weeks,5,5)))
glimpse(BPRSL)
## Observations: 360
## Variables: 5
## $ treatment <fct> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
## $ subject   <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1...
## $ weeks     <chr> "week0", "week0", "week0", "week0", "week0", "week0"...
## $ bprs      <int> 42, 58, 54, 55, 72, 48, 71, 30, 41, 57, 30, 55, 36, ...
## $ week      <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...

============================================================= The BPRS data in its original wide form contains In the wide form of BPRS data, we have 11 variables “treatment” “subject” “week0” “week1” “week2” “week3” “week4” “week5” “week6” “week7” “week8”. In wide from a subject’s repeated responses are in a single row and each response is in separate column. In the long form the variables are “treatment” “subject” “weeks” “bprs” “week” “stdbprs” . It now contains 360 observations and 6 variables. In the long form of the data we have in each row representing one time point per subject. So each subject will have data in multiple rows. Any variable that do not change aross time will have the same value ib all rows =============================================================

6.2.4 Plotting the BPRSL data

ggplot(BPRSL, aes(x = week, y = bprs, linetype = subject)) +
  geom_line() +
  scale_linetype_manual(values = rep(1:10, times=4)) +
  facet_grid(. ~ treatment, labeller = label_both) +
  theme(legend.position = "none") + 
  scale_y_continuous(limits = c(min(BPRSL$bprs), max(BPRSL$bprs)))

============================================================================================== By taking into account the longitudinal structure of the BRPS data by joining together the points belonging to each suject to the bprs profiles of individual subject, we can see that there exists substantial variation between individuals and within individuals acroo time ==============================================================================================

6.2.5 Creating a regression model BPRS_reg

BPRS_reg <- lm(bprs ~ week + treatment, data = BPRSL)
summary(BPRS_reg)
## 
## Call:
## lm(formula = bprs ~ week + treatment, data = BPRSL)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -22.454  -8.965  -3.196   7.002  50.244 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  46.4539     1.3670  33.982   <2e-16 ***
## week         -2.2704     0.2524  -8.995   <2e-16 ***
## treatment2    0.5722     1.3034   0.439    0.661    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 12.37 on 357 degrees of freedom
## Multiple R-squared:  0.1851, Adjusted R-squared:  0.1806 
## F-statistic: 40.55 on 2 and 357 DF,  p-value: < 2.2e-16

================================================== We are fitting a multiple regression model with bprs as response and week and treatment as explanatory variables. It can be seen that treatment 1 and 2 dont seem to differ conditional on time ==================================================

6.2.6 Creating a random intercept model for BPRSL data

library(lme4)
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
BPRS_ref <- lmer(bprs ~ week + treatment + (1 | subject), data = BPRSL, REML = FALSE)
print(BPRS_ref)
## Linear mixed model fit by maximum likelihood  ['lmerMod']
## Formula: bprs ~ week + treatment + (1 | subject)
##    Data: BPRSL
##       AIC       BIC    logLik  deviance  df.resid 
##  2748.712  2768.143 -1369.356  2738.712       355 
## Random effects:
##  Groups   Name        Std.Dev.
##  subject  (Intercept)  6.885  
##  Residual             10.208  
## Number of obs: 360, groups:  subject, 20
## Fixed Effects:
## (Intercept)         week   treatment2  
##     46.4539      -2.2704       0.5722

======================================================== We can fit the randrom intercept model for the same two explanatory variables: week and treatment. Fitting a random intercept model allows the linear regression fit for each subject to differ in intercept from other subjects. ========================================================

6.2.7 creating a random intercept and random slope model for BPRSL data

BPRS_ref1 <- lmer(bprs ~ week + treatment + (week | subject), data = BPRSL, REML = FALSE)
print(BPRS_ref1)
## Linear mixed model fit by maximum likelihood  ['lmerMod']
## Formula: bprs ~ week + treatment + (week | subject)
##    Data: BPRSL
##       AIC       BIC    logLik  deviance  df.resid 
##  2745.440  2772.643 -1365.720  2731.440       353 
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  subject  (Intercept) 8.0512        
##           week        0.9803   -0.51
##  Residual             9.8707        
## Number of obs: 360, groups:  subject, 20
## Fixed Effects:
## (Intercept)         week   treatment2  
##     46.4539      -2.2704       0.5722

======================================================== We can also fit the ransom intercept and random slope model to the bprs data. Fitting a random intercept and random slope model allows the linear regression firs for each individual to differ in intercept but also in slope. It is possible to account for the individual difference in the subjects’ bprs profiles,but also the effect of time ========================================================

6.2.8 Performing an ANOVA test on the two models of BPRS data

anova(BPRS_ref1, BPRS_ref)
## Data: BPRSL
## Models:
## BPRS_ref: bprs ~ week + treatment + (1 | subject)
## BPRS_ref1: bprs ~ week + treatment + (week | subject)
##           Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## BPRS_ref   5 2748.7 2768.1 -1369.4   2738.7                           
## BPRS_ref1  7 2745.4 2772.6 -1365.7   2731.4 7.2721      2    0.02636 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

============================================================================= BPRS_ref1 seem to fit better according to significance level of BPRS_fit1 and BPRS-fit ratio =============================================================================

6.2.9 Creating a random intercept and random slope model

BPRS_ref2 <- lmer(bprs ~ week * treatment + (week | subject), data = BPRSL, REML = FALSE)
summary(BPRS_ref2)
## Linear mixed model fit by maximum likelihood  ['lmerMod']
## Formula: bprs ~ week * treatment + (week | subject)
##    Data: BPRSL
## 
##      AIC      BIC   logLik deviance df.resid 
##   2744.3   2775.4  -1364.1   2728.3      352 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.0512 -0.6271 -0.0768  0.5288  3.9260 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  subject  (Intercept) 64.9964  8.0620        
##           week         0.9687  0.9842   -0.51
##  Residual             96.4707  9.8220        
## Number of obs: 360, groups:  subject, 20
## 
## Fixed effects:
##                 Estimate Std. Error t value
## (Intercept)      47.8856     2.2521  21.262
## week             -2.6283     0.3589  -7.323
## treatment2       -2.2911     1.9090  -1.200
## week:treatment2   0.7158     0.4010   1.785
## 
## Correlation of Fixed Effects:
##             (Intr) week   trtmn2
## week        -0.650              
## treatment2  -0.424  0.469       
## wek:trtmnt2  0.356 -0.559 -0.840

========================================================== With random intercept and slope model fit we can assess interaction between treatment and week. It looks like interaction model is not a better fir for bprs data ==========================================================

6.2.10 Perform an ANOVA test on the two models

anova(BPRS_ref2, BPRS_ref1)
## Data: BPRSL
## Models:
## BPRS_ref1: bprs ~ week + treatment + (week | subject)
## BPRS_ref2: bprs ~ week * treatment + (week | subject)
##           Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## BPRS_ref1  7 2745.4 2772.6 -1365.7   2731.4                           
## BPRS_ref2  8 2744.3 2775.4 -1364.1   2728.3 3.1712      1    0.07495 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

6.2.11 Drawing the plot of BPRSL

ggplot(BPRSL, aes(x = week, y = bprs, treatment = subject)) +
  geom_line(aes(linetype = treatment))+
  scale_linetype_manual(values = rep(1:10, times=4)) +
  facet_grid(. ~ treatment, labeller = label_both) +
  scale_x_continuous(name = "Time (week)")+
  scale_y_continuous(name = "Observed bprs (psychiatric rating scale)") +
  theme(legend.position = "top")

6.2.12 Creating a vector of the fitted values

Fitted <- fitted(BPRS_ref2)

6.2.13 Creating a new column fitted to BPRSL data

BPRSL <- BPRSL %>%
  mutate(Fitted)

6.2.14 Drawing the plot of BPRSL

ggplot(BPRSL, aes(x = week, y = bprs, treatment = subject)) +
  geom_line(aes(linetype = treatment))+
  scale_linetype_manual(values = rep(1:10, times=4)) +
  facet_grid(. ~ treatment, labeller = label_both) +
  scale_x_continuous(name = "Time (week)")+
  scale_y_continuous(name = "Fitted bprs (psychiatric rating scale)") +
  theme(legend.position = "top")

====================================================================================== The interaction model fits well to the observed bprs data ====================================================================================== ***